include $(BASEDIR)/arch/$(TARGET_ARCH)/Rules.mk
-CFLAGS += -g
+CFLAGS += -g -D__XEN__
ifneq ($(debug),y)
CFLAGS += -DNDEBUG
vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
-/* turn off temporarily, we will merge hypercall parameter convention with xeno, when
- VTI domain need to call hypercall */
-#if 0
-unsigned long __hypercall_create_continuation(
- unsigned int op, unsigned int nr_args, ...)
-{
- struct mc_state *mcs = &mc_state[smp_processor_id()];
- VCPU *vcpu = current;
- struct cpu_user_regs *regs = vcpu_regs(vcpu);
- unsigned int i;
- va_list args;
-
- va_start(args, nr_args);
- if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
- panic("PREEMPT happen in multicall\n"); // Not support yet
- } else {
- vcpu_set_gr(vcpu, 15, op, 0);
- for ( i = 0; i < nr_args; i++) {
- switch (i) {
- case 0: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
- break;
- case 1: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
- break;
- case 2: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
- break;
- case 3: vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
- break;
- case 4: vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
- break;
- default: panic("Too many args for hypercall continuation\n");
- break;
- }
- }
- }
- vcpu->arch.hypercall_continuation = 1;
- va_end(args);
- return op;
-}
-#endif
+
void hyper_dom_mem_op(void)
{
VCPU *vcpu=current;
reflect_interruption(isr,regs,vector);
}
-unsigned long __hypercall_create_continuation(
- unsigned int op, unsigned int nr_args, ...)
+unsigned long hypercall_create_continuation(
+ unsigned int op, const char *format, ...)
{
struct mc_state *mcs = &mc_state[smp_processor_id()];
VCPU *vcpu = current;
struct cpu_user_regs *regs = vcpu_regs(vcpu);
+ const char *p = format;
+ unsigned long arg;
unsigned int i;
va_list args;
- va_start(args, nr_args);
+ va_start(args, format);
if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
panic("PREEMPT happen in multicall\n"); // Not support yet
} else {
vcpu_set_gr(vcpu, 2, op, 0);
- for ( i = 0; i < nr_args; i++) {
+ for ( i = 0; *p != '\0'; i++) {
+ switch ( *p++ )
+ {
+ case 'i':
+ arg = (unsigned long)va_arg(args, unsigned int);
+ break;
+ case 'l':
+ arg = (unsigned long)va_arg(args, unsigned long);
+ break;
+ case 'p':
+ case 'h':
+ arg = (unsigned long)va_arg(args, void *);
+ break;
+ default:
+ arg = 0;
+ BUG();
+ }
switch (i) {
- case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long), 0);
+ case 0: vcpu_set_gr(vcpu, 14, arg, 0);
break;
- case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long), 0);
+ case 1: vcpu_set_gr(vcpu, 15, arg, 0);
break;
- case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
+ case 2: vcpu_set_gr(vcpu, 16, arg, 0);
break;
- case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
+ case 3: vcpu_set_gr(vcpu, 17, arg, 0);
break;
- case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
+ case 4: vcpu_set_gr(vcpu, 18, arg, 0);
break;
default: panic("Too many args for hypercall continuation\n");
break;
flush_tlb_mask(v->vcpu_dirty_cpumask);
}
-unsigned long __hypercall_create_continuation(
- unsigned int op, unsigned int nr_args, ...)
+#define next_arg(fmt, args) ({ \
+ unsigned long __arg; \
+ switch ( *(fmt)++ ) \
+ { \
+ case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
+ case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
+ case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
+ case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
+ default: __arg = 0; BUG(); \
+ } \
+ __arg; \
+})
+
+unsigned long hypercall_create_continuation(
+ unsigned int op, const char *format, ...)
{
struct mc_state *mcs = &mc_state[smp_processor_id()];
struct cpu_user_regs *regs;
+ const char *p = format;
+ unsigned long arg;
unsigned int i;
va_list args;
- va_start(args, nr_args);
+ va_start(args, format);
if ( test_bit(_MCSF_in_multicall, &mcs->flags) )
{
__set_bit(_MCSF_call_preempted, &mcs->flags);
- for ( i = 0; i < nr_args; i++ )
- mcs->call.args[i] = va_arg(args, unsigned long);
+ for ( i = 0; *p != '\0'; i++ )
+ mcs->call.args[i] = next_arg(p, args);
}
else
{
else
regs->eip -= 2; /* re-execute 'int 0x82' */
- for ( i = 0; i < nr_args; i++ )
+ for ( i = 0; *p != '\0'; i++ )
{
+ arg = next_arg(p, args);
switch ( i )
{
- case 0: regs->ebx = va_arg(args, unsigned long); break;
- case 1: regs->ecx = va_arg(args, unsigned long); break;
- case 2: regs->edx = va_arg(args, unsigned long); break;
- case 3: regs->esi = va_arg(args, unsigned long); break;
- case 4: regs->edi = va_arg(args, unsigned long); break;
- case 5: regs->ebp = va_arg(args, unsigned long); break;
+ case 0: regs->ebx = arg; break;
+ case 1: regs->ecx = arg; break;
+ case 2: regs->edx = arg; break;
+ case 3: regs->esi = arg; break;
+ case 4: regs->edi = arg; break;
+ case 5: regs->ebp = arg; break;
}
}
#elif defined(__x86_64__)
regs->rax = op;
regs->rip -= 2; /* re-execute 'syscall' */
- for ( i = 0; i < nr_args; i++ )
+ for ( i = 0; *p != '\0'; i++ )
{
+ arg = next_arg(p, args);
switch ( i )
{
- case 0: regs->rdi = va_arg(args, unsigned long); break;
- case 1: regs->rsi = va_arg(args, unsigned long); break;
- case 2: regs->rdx = va_arg(args, unsigned long); break;
- case 3: regs->r10 = va_arg(args, unsigned long); break;
- case 4: regs->r8 = va_arg(args, unsigned long); break;
- case 5: regs->r9 = va_arg(args, unsigned long); break;
+ case 0: regs->rdi = arg; break;
+ case 1: regs->rsi = arg; break;
+ case 2: regs->rdx = arg; break;
+ case 3: regs->r10 = arg; break;
+ case 4: regs->r8 = arg; break;
+ case 5: regs->r9 = arg; break;
}
}
#endif
#include <xen/domain_page.h>
#include <xen/event.h>
#include <xen/iocap.h>
+#include <xen/guest_access.h>
#include <asm/shadow.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
#include <asm/ldt.h>
#include <asm/x86_emulate.h>
#include <public/memory.h>
{
if ( hypercall_preempt_check() )
{
- rc = hypercall4_create_continuation(
- __HYPERVISOR_mmuext_op, uops,
- (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_mmuext_op, "pipi",
+ uops, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
break;
}
{
if ( hypercall_preempt_check() )
{
- rc = hypercall4_create_continuation(
- __HYPERVISOR_mmu_update, ureqs,
- (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_mmu_update, "pipi",
+ ureqs, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
break;
}
}
-long arch_memory_op(int op, void *arg)
+long arch_memory_op(int op, GUEST_HANDLE(void) arg)
{
struct xen_reserved_phys_area xrpa;
unsigned long pfn;
switch ( op )
{
case XENMEM_reserved_phys_area:
- if ( copy_from_user(&xrpa, arg, sizeof(xrpa)) )
+ if ( copy_from_guest(&xrpa, arg, 1) )
return -EFAULT;
/* No guest has more than one reserved area. */
put_domain(d);
- if ( copy_to_user(arg, &xrpa, sizeof(xrpa)) )
+ if ( copy_to_guest(arg, &xrpa, 1) )
return -EFAULT;
break;
{
if ( hypercall_preempt_check() )
{
- rc = hypercall1_create_continuation(
- __HYPERVISOR_set_trap_table, traps);
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_set_trap_table, "p", traps);
break;
}
#include <xen/init.h>
#include <xen/mm.h>
#include <xen/sched.h>
+#include <xen/guest_access.h>
#include <asm/current.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
}
}
-long subarch_memory_op(int op, void *arg)
+long subarch_memory_op(int op, GUEST_HANDLE(void) arg)
{
struct xen_machphys_mfn_list xmml;
unsigned long mfn;
switch ( op )
{
case XENMEM_machphys_mfn_list:
- if ( copy_from_user(&xmml, arg, sizeof(xmml)) )
+ if ( copy_from_guest(&xmml, arg, 1) )
return -EFAULT;
max = min_t(unsigned int, xmml.max_extents, mpt_size >> 21);
{
mfn = l2e_get_pfn(idle_pg_table_l2[l2_linear_offset(
RDWR_MPT_VIRT_START + (i << 21))]) + l1_table_offset(i << 21);
- if ( put_user(mfn, &xmml.extent_start[i]) )
+ if ( copy_to_guest_offset(xmml.extent_start, i, &mfn, 1) )
return -EFAULT;
}
- if ( put_user(i, &((struct xen_machphys_mfn_list *)arg)->nr_extents) )
+ xmml.nr_extents = i;
+ if ( copy_to_guest(arg, &xmml, 1) )
return -EFAULT;
break;
#include <xen/init.h>
#include <xen/mm.h>
#include <xen/sched.h>
+#include <xen/guest_access.h>
#include <asm/current.h>
#include <asm/asm_defns.h>
#include <asm/page.h>
}
}
-long subarch_memory_op(int op, void *arg)
+long subarch_memory_op(int op, GUEST_HANDLE(void) arg)
{
struct xen_machphys_mfn_list xmml;
l3_pgentry_t l3e;
switch ( op )
{
case XENMEM_machphys_mfn_list:
- if ( copy_from_user(&xmml, arg, sizeof(xmml)) )
+ if ( copy_from_guest(&xmml, arg, 1) )
return -EFAULT;
for ( i = 0, v = RDWR_MPT_VIRT_START;
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
break;
mfn = l2e_get_pfn(l2e) + l1_table_offset(v);
- if ( put_user(mfn, &xmml.extent_start[i]) )
+ if ( copy_to_guest_offset(xmml.extent_start, i, &mfn, 1) )
return -EFAULT;
}
- if ( put_user(i, &((struct xen_machphys_mfn_list *)arg)->nr_extents) )
+ xmml.nr_extents = i;
+ if ( copy_to_guest(arg, &xmml, 1) )
return -EFAULT;
break;
#include <xen/event.h>
#include <xen/shadow.h>
#include <xen/iocap.h>
+#include <xen/guest_access.h>
#include <asm/current.h>
#include <asm/hardirq.h>
#include <public/memory.h>
static long
increase_reservation(
struct domain *d,
- unsigned long *extent_list,
+ GUEST_HANDLE(xen_ulong) extent_list,
unsigned int nr_extents,
unsigned int extent_order,
unsigned int flags,
struct page_info *page;
unsigned long i, mfn;
- if ( (extent_list != NULL) &&
- !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+ if ( !guest_handle_is_null(extent_list) &&
+ !guest_handle_okay(extent_list, nr_extents) )
return 0;
if ( (extent_order != 0) &&
}
/* Inform the domain of the new page's machine address. */
- if ( extent_list != NULL )
+ if ( !guest_handle_is_null(extent_list) )
{
mfn = page_to_mfn(page);
- if ( unlikely(__copy_to_user(&extent_list[i], &mfn, sizeof(mfn))) )
+ if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
return i;
}
}
static long
populate_physmap(
struct domain *d,
- unsigned long *extent_list,
- unsigned int nr_extents,
- unsigned int extent_order,
- unsigned int flags,
- int *preempted)
+ GUEST_HANDLE(xen_ulong) extent_list,
+ unsigned int nr_extents,
+ unsigned int extent_order,
+ unsigned int flags,
+ int *preempted)
{
struct page_info *page;
unsigned long i, j, gpfn, mfn;
- if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+ if ( !guest_handle_okay(extent_list, nr_extents) )
return 0;
if ( (extent_order != 0) &&
goto out;
}
- if ( unlikely(__copy_from_user(&gpfn, &extent_list[i], sizeof(gpfn))) )
+ if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) )
goto out;
if ( unlikely((page = alloc_domheap_pages(
set_gpfn_from_mfn(mfn + j, gpfn + j);
/* Inform the domain of the new page's machine address. */
- if ( unlikely(__copy_to_user(&extent_list[i], &mfn, sizeof(mfn))) )
+ if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
goto out;
}
}
static long
decrease_reservation(
- struct domain *d,
- unsigned long *extent_list,
+ struct domain *d,
+ GUEST_HANDLE(xen_ulong) extent_list,
unsigned int nr_extents,
unsigned int extent_order,
unsigned int flags,
struct page_info *page;
unsigned long i, j, gmfn, mfn;
- if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+ if ( !guest_handle_okay(extent_list, nr_extents) )
return 0;
for ( i = 0; i < nr_extents; i++ )
return i;
}
- if ( unlikely(__copy_from_user(&gmfn, &extent_list[i], sizeof(gmfn))) )
+ if ( unlikely(__copy_from_guest_offset(&gmfn, extent_list, i, 1)) )
return i;
for ( j = 0; j < (1 << extent_order); j++ )
static long
translate_gpfn_list(
- struct xen_translate_gpfn_list *uop, unsigned long *progress)
+ GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
{
struct xen_translate_gpfn_list op;
unsigned long i, gpfn, mfn;
struct domain *d;
- if ( copy_from_user(&op, uop, sizeof(op)) )
+ if ( copy_from_guest(&op, uop, 1) )
return -EFAULT;
/* Is size too large for us to encode a continuation? */
if ( op.nr_gpfns > (ULONG_MAX >> START_EXTENT_SHIFT) )
return -EINVAL;
- if ( !array_access_ok(op.gpfn_list, op.nr_gpfns, sizeof(*op.gpfn_list)) ||
- !array_access_ok(op.mfn_list, op.nr_gpfns, sizeof(*op.mfn_list)) )
+ if ( !guest_handle_okay(op.gpfn_list, op.nr_gpfns) ||
+ !guest_handle_okay(op.mfn_list, op.nr_gpfns) )
return -EFAULT;
if ( op.domid == DOMID_SELF )
return -EAGAIN;
}
- if ( unlikely(__copy_from_user(&gpfn, &op.gpfn_list[i],
- sizeof(gpfn))) )
+ if ( unlikely(__copy_from_guest_offset(&gpfn, op.gpfn_list, i, 1)) )
{
put_domain(d);
return -EFAULT;
mfn = gmfn_to_mfn(d, gpfn);
- if ( unlikely(__copy_to_user(&op.mfn_list[i], &mfn,
- sizeof(mfn))) )
+ if ( unlikely(__copy_to_guest_offset(op.mfn_list, i, &mfn, 1)) )
{
put_domain(d);
return -EFAULT;
return 0;
}
-long do_memory_op(unsigned long cmd, void *arg)
+long do_memory_op(unsigned long cmd, GUEST_HANDLE(void) arg)
{
struct domain *d;
int rc, op, flags = 0, preempted = 0;
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap:
- if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
+ if ( copy_from_guest(&reservation, arg, 1) )
return -EFAULT;
/* Is size too large for us to encode a continuation? */
start_extent = cmd >> START_EXTENT_SHIFT;
if ( unlikely(start_extent > reservation.nr_extents) )
return -EINVAL;
-
- if ( reservation.extent_start != NULL )
- reservation.extent_start += start_extent;
+
+ if ( !guest_handle_is_null(reservation.extent_start) )
+ guest_handle_add_offset(reservation.extent_start, start_extent);
reservation.nr_extents -= start_extent;
if ( (reservation.address_bits != 0) &&
rc += start_extent;
if ( preempted )
- return hypercall2_create_continuation(
- __HYPERVISOR_memory_op, op | (rc << START_EXTENT_SHIFT), arg);
+ return hypercall_create_continuation(
+ __HYPERVISOR_memory_op, "lh",
+ op | (rc << START_EXTENT_SHIFT), arg);
break;
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
- if ( copy_from_user(&domid, (domid_t *)arg, sizeof(domid)) )
+ if ( copy_from_guest(&domid, arg, 1) )
return -EFAULT;
- if ( likely((domid = (unsigned long)arg) == DOMID_SELF) )
+ if ( likely(domid == DOMID_SELF) )
d = current->domain;
else if ( !IS_PRIV(current->domain) )
return -EPERM;
case XENMEM_translate_gpfn_list:
progress = cmd >> START_EXTENT_SHIFT;
- rc = translate_gpfn_list(arg, &progress);
+ rc = translate_gpfn_list(
+ guest_handle_cast(arg, xen_translate_gpfn_list_t),
+ &progress);
if ( rc == -EAGAIN )
- return hypercall2_create_continuation(
- __HYPERVISOR_memory_op,
- op | (progress << START_EXTENT_SHIFT),
- arg);
+ return hypercall_create_continuation(
+ __HYPERVISOR_memory_op, "lh",
+ op | (progress << START_EXTENT_SHIFT), arg);
break;
default:
if ( i < nr_calls )
{
mcs->flags = 0;
- return hypercall2_create_continuation(
- __HYPERVISOR_multicall, &call_list[i], nr_calls-i);
+ return hypercall_create_continuation(
+ __HYPERVISOR_multicall, "pi", &call_list[i], nr_calls-i);
}
}
}
}
if ( hypercall_preempt_check() )
- return hypercall3_create_continuation(
- __HYPERVISOR_console_io, CONSOLEIO_write, count, buffer);
+ return hypercall_create_continuation(
+ __HYPERVISOR_console_io, "iip",
+ CONSOLEIO_write, count, buffer);
kcount = min_t(int, count, sizeof(kbuf)-1);
if ( copy_from_user(kbuf, buffer, kcount) )
int __sync_lazy_execstate(void);
/* Arch-specific portion of memory_op hypercall. */
-long arch_memory_op(int op, void *arg);
-long subarch_memory_op(int op, void *arg);
+long arch_memory_op(int op, GUEST_HANDLE(void) arg);
+long subarch_memory_op(int op, GUEST_HANDLE(void) arg);
#endif /* __ASM_X86_MM_H__ */
* OUT: GMFN bases of extents that were allocated
* (NB. This command also updates the mach_to_phys translation table)
*/
- unsigned long *extent_start;
+ GUEST_HANDLE(xen_ulong) extent_start;
/* Number of extents, and size/alignment of each (2^extent_order pages). */
unsigned long nr_extents;
domid_t domid;
} xen_memory_reservation_t;
+DEFINE_GUEST_HANDLE(xen_memory_reservation_t);
/*
* Returns the maximum machine frame number of mapped RAM in this system.
* any large discontiguities in the machine address space, 2MB gaps in
* the machphys table will be represented by an MFN base of zero.
*/
- unsigned long *extent_start;
+ GUEST_HANDLE(xen_ulong) extent_start;
/*
* Number of extents written to the above array. This will be smaller
*/
unsigned int nr_extents;
} xen_machphys_mfn_list_t;
+DEFINE_GUEST_HANDLE(xen_machphys_mfn_list_t);
/*
* Returns the base and size of the specified reserved 'RAM hole' in the
/* Base and size of the specified reserved area. */
unsigned long first_gpfn, nr_gpfns;
} xen_reserved_phys_area_t;
+DEFINE_GUEST_HANDLE(xen_reserved_phys_area_t);
/*
* Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
unsigned long nr_gpfns;
/* List of GPFNs to translate. */
- unsigned long *gpfn_list;
+ GUEST_HANDLE(xen_ulong) gpfn_list;
/*
* Output list to contain MFN translations. May be the same as the input
* list (in which case each input GPFN is overwritten with the output MFN).
*/
- unsigned long *mfn_list;
+ GUEST_HANDLE(xen_ulong) mfn_list;
} xen_translate_gpfn_list_t;
+DEFINE_GUEST_HANDLE(xen_translate_gpfn_list_t);
#endif /* __XEN_PUBLIC_MEMORY_H__ */
#ifndef __XEN_PUBLIC_XEN_H__
#define __XEN_PUBLIC_XEN_H__
+#ifdef __XEN__
+#define DEFINE_GUEST_HANDLE(type) struct __guest_handle_ ## type { type *p; }
+#define GUEST_HANDLE(type) struct __guest_handle_ ## type
+#else
+#define DEFINE_GUEST_HANDLE(type)
+#define GUEST_HANDLE(type) type *
+#endif
+
+#ifndef __ASSEMBLY__
+/* Guest handle for unsigned long pointer. Define a name with no whitespace. */
+typedef unsigned long xen_ulong;
+DEFINE_GUEST_HANDLE(xen_ulong);
+/* Guest handle for arbitrary-type pointer (void *). */
+DEFINE_GUEST_HANDLE(void);
+#endif
+
#if defined(__i386__)
#include "arch-x86_32.h"
#elif defined(__x86_64__)
--- /dev/null
+/******************************************************************************
+ * guest_access.h
+ *
+ * Copyright (x) 2006, K A Fraser
+ */
+
+#ifndef __XEN_GUEST_ACCESS_H__
+#define __XEN_GUEST_ACCESS_H__
+
+#include <asm/uaccess.h>
+
+/* Is the guest handle a NULL reference? */
+#define guest_handle_is_null(hnd) ((hnd).p == NULL)
+
+/* Offset the given guest handle into the array it refers to. */
+#define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr))
+
+/* Cast a guest handle to the specified type of handle. */
+#define guest_handle_cast(hnd, type) ({ \
+ type *_x = (hnd).p; \
+ (GUEST_HANDLE(type)) { _x }; \
+})
+
+/*
+ * Copy an array of objects to guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_to_guest_offset(hnd, off, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+})
+#define copy_to_guest(hnd, ptr, nr) \
+ copy_to_guest_offset(hnd, 0, ptr, nr)
+
+/*
+ * Copy an array of objects from guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_from_guest_offset(ptr, hnd, off, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+})
+#define copy_from_guest(ptr, hnd, nr) \
+ copy_from_guest_offset(ptr, hnd, 0, nr)
+
+/*
+ * Pre-validate a guest handle.
+ * Allows use of faster __copy_* functions.
+ */
+#define guest_handle_okay(hnd, nr) \
+ array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))
+
+#define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \
+})
+#define __copy_to_guest(hnd, ptr, nr) \
+ __copy_to_guest_offset(hnd, 0, ptr, nr)
+
+#define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \
+})
+#define __copy_from_guest(ptr, hnd, nr) \
+ __copy_from_guest_offset(ptr, hnd, 0, nr)
+
+#endif /* __XEN_GUEST_ACCESS_H__ */
void startup_cpu_idle_loop(void);
-unsigned long __hypercall_create_continuation(
- unsigned int op, unsigned int nr_args, ...);
-#define hypercall0_create_continuation(_op) \
- __hypercall_create_continuation((_op), 0)
-#define hypercall1_create_continuation(_op, _a1) \
- __hypercall_create_continuation((_op), 1, \
- (unsigned long)(_a1))
-#define hypercall2_create_continuation(_op, _a1, _a2) \
- __hypercall_create_continuation((_op), 2, \
- (unsigned long)(_a1), (unsigned long)(_a2))
-#define hypercall3_create_continuation(_op, _a1, _a2, _a3) \
- __hypercall_create_continuation((_op), 3, \
- (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3))
-#define hypercall4_create_continuation(_op, _a1, _a2, _a3, _a4) \
- __hypercall_create_continuation((_op), 4, \
- (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
- (unsigned long)(_a4))
-#define hypercall5_create_continuation(_op, _a1, _a2, _a3, _a4, _a5) \
- __hypercall_create_continuation((_op), 5, \
- (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
- (unsigned long)(_a4), (unsigned long)(_a5))
-#define hypercall6_create_continuation(_op, _a1, _a2, _a3, _a4, _a5, _a6) \
- __hypercall_create_continuation((_op), 6, \
- (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
- (unsigned long)(_a4), (unsigned long)(_a5), (unsigned long)(_a6))
+/*
+ * Creates a continuation to resume the current hypercall. The caller should
+ * return immediately, propagating the value returned from this invocation.
+ * The format string specifies the types and number of hypercall arguments.
+ * It contains one character per argument as follows:
+ * 'i' [unsigned] {char, int}
+ * 'l' [unsigned] long
+ * 'p' pointer (foo *)
+ * 'h' guest handle (GUEST_HANDLE(foo))
+ */
+unsigned long hypercall_create_continuation(
+ unsigned int op, const char *format, ...);
#define hypercall_preempt_check() (unlikely( \
softirq_pending(smp_processor_id()) | \